from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
The raw code for this IPython notebook is by default hidden for easier reading.
To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''')
import rpy2
print(rpy2.__version__)
import rpy2.rinterface
from rpy2.robjects.packages import importr
import rpy2.robjects.packages as rpackages
#%load_ext rpy2.ipython
import rpy2
import rpy2.robjects.packages as rpackages
import pandas as pd
# import rpy2's package module
# import R's utility package
utils = rpackages.importr('utils',lib_loc="C:/Program Files/R/R-3.5.0/library")
#importr("ggplot2", lib_loc= Where_is_R)
#utils.chooseCRANmirror(ind=1) # select the first mirror in the list
#base=rpackages.importr('base')
# select a mirror for R packages
#import R's "base" package
!curl -O "https://raw.githubusercontent.com/vitorcurtis/RWinOut/master/RWinOut.py"
%load_ext RWinOut
###Python Multiplotter
def showImagesMatrix(list_of_files,hSize, wSize,col=10):
fig = figure( figsize=(wSize, hSize))
number_of_files = len(list_of_files)
row = number_of_files/col
if (number_of_files%col != 0):
row += 1
for i in range(number_of_files):
a=fig.add_subplot(row,col,i+1)
image = imread(mypath+'/'+list_of_files[i])
imshow(image,cmap='Greys_r')
axis('off')
from IPython.display import HTML
#HTML('''<script>
#code_show=true;
#function code_toggle() {
# if (code_show){
#$('div.input').hide();
#} else {
#$('div.input').show();
#}
#code_show = !code_show!curl -O "https://raw.githubusercontent.com/vitorcurtis/RWinOut/master/RWinOut.py"
%load_ext RWinOu
#}
#$( document ).ready(code_toggle);
#</script>
#<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
!curl -O "https://raw.githubusercontent.com/vitorcurtis/RWinOut/master/RWinOut.py"
%load_ext RWinOut
def showImagesMatrix(list_of_files,hSize, wSize,col=10):
fig = figure( figsize=(wSize, hSize))
number_of_files = len(list_of_files)
row = number_of_files/col
if (number_of_files%col != 0):
row += 1
for i in range(number_of_files):
a=fig.add_subplot(row,col,i+1)
image = imread(mypath+'/'+list_of_files[i])
imshow(image,cmap='Greys_r')
axis('off')
from IPython.display import Image
PATH = "E:/FS_meeting/FiguresMisc/"
Image(filename = PATH + "Apps_Long.jpg", width=1000, height=1000)
Where_is_R="C:/Users/zacha/Documents/R/win-library/3.5"
#import rpy2's package mod
importr("rgdal", lib_loc= Where_is_R)
importr("raster", lib_loc= Where_is_R)
importr("sf", lib_loc= Where_is_R)
importr("magrittr", lib_loc= Where_is_R)
Most of the nitrogen, carbon and lignin parameters were started from existing papers, and the TRY database. The file NECN_Folder contains the data taken from each paper and the database. Each Try is linked to an individual paper in the try files. Some species were adapted from existing papers where the formula was assumed to have a carbon ratio of 1/2. This formula is therefore C: N=.5 /(g nitrogen/g total). For species that were not included in either papers nor the plant database were researched individually, or given a value based on a genus/family/order similarity to a species in this or another Landis papers. I Started with values from previous LANDIS-II models and supplmented them with data from the TRY. Try data was queried by Species and by trait. In the even of multiple returns values were meaned.
TRY Data: Kattge, J., Bönisch, G., Günther, A., Wright, I., Zanne, A., Wirth, C., Reich, P.B. and the TRY Consortium (2012) TRY - Categorical Traits Dataset. Data from: TRY - a global database of plant traits. TRY File Archive https://www.try-db.org/TryWeb/Data.php#3
%%R
w_dir<-"E:/FS_meeting/NECN_Biochem/"
setwd(w_dir)
w_dir="E:/FS_meeting/NECN_Biochem/"
Try=pd.read_csv(w_dir+"Try_Proc.csv")
#Try.head(n=10)
print("Example Acer rubrum")
Try[Try.sp=="Acer rubrum"]
Try[Try.trait=="Leaf nitrogen (N) content per leaf dry mass"]
print("Example:Leaf Nitrogen")
Try.head(n=20)
These values were used to update values for N:C and lignin, These records can be found at in Cleanedup NECN documentation.csv. Values were also updated using records: primarily from Davis 2009
PATH = "E:/FS_meeting/FiguresMisc/"
Image(filename = PATH + "Davis2009.PNG", width=1000, height=1000)
Values that were still missing were assesed as similar through genus then through genus then family and in a couple cases case order. These decesions can be found in the document:
importr("MASS", lib_loc= Where_is_R)
importr("meanShiftR", lib_loc= Where_is_R)
importr("ggfortify", lib_loc= Where_is_R)
importr("ggplot2", lib_loc= Where_is_R)
%%R
library(meanShiftR)
library(ggfortify)
library(ggplot2)
#Set up the work drive
w_dir<-"E:/FS_meeting/Functional_Groups/"
setwd(w_dir)
To assess the functional groups I looked at the Ty Wilson maps for tree range and coupled them elevation levels, and 30yr normals for percipitation, temperature(min,mean,max), and vpd. These then used those to find the bounds of their range (maximum and minimum's of temp percip elevation, vpd). This was then used with a mean-shift algorthim to find the the closest groups based on mulitple criteria. Combinations were recombined to find groups. These groups were then plotted alongside two PCAs to display the grouping
%%R
read<-read.csv(paste(w_dir,"output.csv",sep=""),stringsAsFactors=FALSE)
Sampledf<-read[,-1]
Variableset<-c("sp","Type","Maxtemp","Mintemp","Meantemp","MinPPT","Minelevation","Maxelevation","MinVPD")
Variableset2<-c("sp","Type","Meantemp","MinPPT","MinVPD","Maxelevation")#Noelevation
Sampledf<-Sampledf[,Variableset2]
This is the conifer work
%%R
##Setting up the data for the bms.clustering
Clade="Conifer"
OneClade<-Sampledf[Sampledf[,2]==Clade,]
tree.labels<-OneClade[,c("sp")]
OneCladeproc<-((OneClade[c(-1,-2)]))
Height<-dim(OneCladeproc)[1]
Width<-dim(OneCladeproc)[2]
t<-as.numeric(data.matrix(OneCladeproc))
#seeds.data<-matrix(t,Width,Height)
row1<-t[c(1:Height)]
row1<- (row1-mean(row1))/sd(row1)
row2<-t[c((1+1*Height):(2*Height))]
row2<- (row2-mean(row2))/sd(row2)
row3<-t[c((1+2*Height):(3*Height))]
row3<- (row3-mean(row3))/sd(row3)
row4<-t[c((1+3*Height):(4*Height))]
row4<- (row4-mean(row4))/sd(row4)
seeds_data<-rbind(row1,row2,row3,row4)
seeds_data<-t(seeds_data)
names<-colnames(Sampledf)[c(-1,-2)]
%%R
b=.8
b=c(rep(b,ncol(seeds_data)))
iter<-10000
MS_Conif<- meanShift(
seeds_data,
seeds_data,
bandwidth=b,
alpha=0,
iterations = iter
)
MS_Conif_assign <- MS_Conif$assignment
(print(MS_Conif_assign))
# value
MS_Conif_value <- MS_Conif$value
(print(MS_Conif_value))
##What is this
MSOUTPUT<-rbind(Variableset2[c(-1,-2)],MS_Conif_value)
par( mfrow=c( 1,2) )
for(i in 1:(length(names)-1)){
#jpeg(paste(Clade,i,".jpg",sep=""))
plot( seeds_data[,(i)], seeds_data[,(i+1)], col=MS_Conif$assignment,
xlab=names[(i)], ylab=names[(i+1)], main=paste(Clade,ncol(seeds_data),"Groups",sep=""),
cex=0.65, pch=16 )
#dev.off()
}
ms.labels6 <- MS_Conif$assignment
print( ms.labels6 )
ms.modes6 <- MS_Conif$value
print(ms.modes6)
B<-cbind(OneClade,ms.labels6)
seeds.plot<-data.frame((seeds_data))
colnames(seeds.plot)<-names
seeds.plot<-as.data.frame(cbind(seeds.plot,B$sp,as.character(B$ms.labels6)))
colnames(seeds.plot)[6]<-"MS"
colnames(seeds.plot)[5]<-"sp"
seeds.plot$MS<-as.factor(seeds.plot$MS)
seeds.plot$sp<-as.factor(seeds.plot$sp)
###Preform and plot PCA
PCA1<-prcomp(seeds.plot[,1:4])
#jpeg(paste(Clade,"PCA.jpg",sep=""))
autoplot(PCA1,data=seeds.plot,colour='MS',
label=TRUE,loadings=TRUE,
loading.label=TRUE,loadings.label.size=10,
frame=TRUE
)
#dev.off()
Hardwoods
%%R
read<-read.csv(paste(w_dir,"output.csv",sep=""),stringsAsFactors=FALSE)
Sampledf<-read[,-1]
Variableset<-c("sp","Type","Maxtemp","Mintemp","Meantemp","MinPPT","Minelevation","Maxelevation","MinVPD")
Variableset2<-c("sp","Type","Meantemp","MinPPT","MinVPD","Maxelevation")#Noelevation
Sampledf<-Sampledf[,Variableset2]
###Conifers#
Clade="Hardwoods"
OneClade<-Sampledf[Sampledf[,2]==Clade,]
tree.labels<-OneClade[,c("sp")]
OneCladeproc<-((OneClade[c(-1,-2)]))
Height<-dim(OneCladeproc)[1]
Width<-dim(OneCladeproc)[2]
t<-as.numeric(data.matrix(OneCladeproc))
print(t)
#seeds.data<-matrix(t,Width,Height)
row1<-t[c(1:Height)]
row1<- (row1-mean(row1))/sd(row1)
row2<-t[c((1+1*Height):(2*Height))]
row2<- (row2-mean(row2))/sd(row2)
row3<-t[c((1+2*Height):(3*Height))]
row3<- (row3-mean(row3))/sd(row3)
row4<-t[c((1+3*Height):(4*Height))]
row4<- (row4-mean(row4))/sd(row4)
seeds.data<-rbind(row1,row2,row3,row4)
seeds_data<-t(seeds.data)
names<-colnames(Sampledf)[c(-1,-2)]
##Set to three groups
b=.440
b=c(rep(b,ncol(seeds_data)))
iter<-100000
MS_HWD<- meanShift(
seeds_data,
seeds_data,
bandwidth=b,
alpha=0,
iterations = iter
)
MS_HWD_assign <- MS_HWD$assignment
(print(MS_HWD_assign))
# value
MS_HWD_value <- MS_HWD$value
(print(MS_HWD_value))
##What is this
MSOUTPUT<-rbind(Variableset2[c(-1,-2)],MS_HWD_value)
par( mfrow=c( 1,2) )
for(i in 1:(length(names)-1)){
#jpeg(paste(Clade,i,".jpg",sep=""))
plot( seeds_data[,(i)], seeds_data[,(i+1)], col=MS_HWD$assignment,
xlab=names[(i)], ylab=names[(i+1)], main=paste(Clade,ncol(seeds_data),"Groups",sep=""),
cex=0.65, pch=16 )
#dev.off()
}
ms.labels6 <- MS_HWD$assignment
print( ms.labels6 )
ms.modes6 <- MS_HWD$value
print(ms.modes6)
B<-cbind(OneClade,ms.labels6)
%%R
seeds.plot<-data.frame(seeds_data)
colnames(seeds.plot)<-names
seeds.plot<-as.data.frame(cbind(seeds.plot,B$sp,as.character(B$ms.labels6)))
colnames(seeds.plot)[6]<-"MS"
colnames(seeds.plot)[5]<-"sp"
seeds.plot$MS<-as.factor(seeds.plot$MS)
seeds.plot$sp<-as.factor(seeds.plot$sp)
###Preform and plot PCA
PCA1<-prcomp(seeds.plot[,1:4])
#jpeg(paste(Clade,"PCA.jpg",sep=""))
autoplot(PCA1,data=seeds.plot,colour='MS',
label=TRUE,loadings=TRUE,
loading.label=TRUE,loadings.label.size=10,
frame=TRUE
)
#dev.off()
print(PCA1)
PCAprint<-PCA1$rotation
EigenValues<-PCA1$sdev
PCAprint1<-rbind(EigenValues,PCAprint)
PCAprint1
#write.csv(PCAprint1,paste(Clade,"PCA1.csv",sep=""))
#write.csv(seeds.plot,paste(Clade,"FunctionalGroups.csv",sep=""))
#write.csv(MSOUTPUT,paste(Clade,"MS_Output.csv",sep=""))
I chose min ppt, min VPD, mean temp and mean elevation and ran a mean shift algorithm at different band widths until I found 3-4 groups. For the Conifers three groups fit well. For hardwoods there were a few species that fit no group. Because of how mean shift works expanding groups to include them collapsed too much of the difference between groups. In essence you can have 8 groups or 1 groups. I assigned these to the group closest too them, ending with 4 groups. List groups here.
%%R
#Set up the work drive
w_dir<-"E:/Species Imput Files_1_3/Max_AGB/"
setwd(w_dir)
This is a look up table for the species in the study area. Here I am just grabbing out species names
%%R
SPLUT<-read.csv("SpeciesLUT.csv")
uniquesp<-SPLUT[,2]
print(length(SPLUT[,1]))
Important_Species<-c(129,621,832,316,806,833,132,711,802,261)
In this following loop we will
%%R
#par(mfrow=(2,20))
df<-NULL
for(species in uniquesp){
#print(species)
Speciesname<-as.character(SPLUT[,13][SPLUT[,2]==species])
read<-read.csv(paste(w_dir,species,"AGB.csv",sep=""))
read<-as.data.frame(read[complete.cases(read),])
#uniquepercent<-unique(read$perecentofAGB)
# for(t in uniquepercent){
# agebreak<-read[read$perecentofAGB==t,]
# agebreak<-agebreak[agebreak$`AboveGround Carbon`> quantile(agebreak$`AboveGround Carbon`,probs=.80),]
# out<-rbind(agebreak,out)
#}
read$ration<-(read$sumofsp/read$perecentofAGB)
read$ration[is.na(read$ration)]<-0.0
readquant<-read[read$ration > quantile(read$ration,probs=.75),]
readquant$adjsum<-readquant$sumofsp*0.112085
reg1<-lm(readquant$adjsum~readquant$perecentofAGB)
#max<-max(read$sumofsp[!is.na(read$sumofsp)])
maxplot<-max(readquant$sumofsp*0.112085)
#SteepestMax<-maxplot$sumofsp/maxplot$perecentofAGB
intercept<-as.numeric(reg1$coefficients[1])
slope<-as.numeric(reg1$coefficients[2])
likelymaximum<-((1.00)*slope)+intercept
#print(likelymaximum)#to## g per m2
jpeg(filename=(paste(species,"_svp.jpeg",sep="")))
plot(readquant$adjsum~readquant$perecentofAGB, main=paste(Speciesname,",","n=",length(readquant$sumofsp),",","Max AGB",round(likelymaximum,2))
,xlim=c(0,2.0),ylim=c(0,likelymaximum*1.5),xlab="Percent of Stand",ylab="Above ground biomass g/m2",col="Green",pch = 19,cex.main=2.0,cex.lab=1.4,cex.axis=1.2,cex=1.5,font=2)
abline(reg1)
abline(v=1.0,col="red")
abline(h=likelymaximum,col="red")
dev.off()
intercept<-as.numeric(reg1$coefficients[1])
slope<-as.numeric(reg1$coefficients[2])
#SteepestMax*.112085
A<-cbind(species,likelymaximum,maxplot,slope,intercept)
df<-rbind(A,df)
}
colnames(df)<-c("Species","Linear Regression(gm-2) at 125% ","Maximum Measured(gm-2)","Slope","Intercept")
write.csv(w_dir,"AGBlist.csv")
from matplotlib.pyplot import figure, imshow, axis
from matplotlib.image import imread
mypath='E:/Species Imput Files_1_3/Max_AGB'
hSize = 30
wSize = 50
col = 3
Important_Species=[129,621,832,316,806,833,132,711,802,261]
Growthcurveplot=["129_svp.jpeg","621_svp.jpeg","832_svp.jpeg","832_svp.jpeg","316_svp.jpeg","806_svp.jpeg","833_svp.jpeg",
"132_svp.jpeg","711_svp.jpeg","802_svp.jpeg","261_svp.jpeg",]
showImagesMatrix(Growthcurveplot, hSize = 62, wSize = 20,col=2)
Now that we have seen what some values for the maximum amount of Biomass we want to get a better idea for what the growth curves should look like so that we can calibarte the growth over time. To do this we use a "" associaation to look at the sight index and height to relate to age. Then we will look at the upper 20% of sites a fit a logarithmic regression to simulate the growht patterns of trees. This will be used as a basis for LANDIS-II Trials to tune the rate of growth.
Importing libraries from R
importr("plyr", lib_loc= "C:/Users/zjrobbin/Documents/R/win-library/3.4")
#importr("dplyr", lib_loc= "C:/Users/zjrobbin/Documents/R/win-library/3.4")
importr("sp", lib_loc= "C:/Users/zjrobbin/Documents/R/win-library/3.4")
#importr("magrittr", lib_loc= "C:/Users/zjrobbin/Documents/R/win-library/3.4")
Setting the Working Drive
%%R
library(plyr)
#library(dplyr)
library(sp)
library(raster); options(scipen=999) # scipen removes scientific notation from FIA data
#library(rgdal)
wdir<-"C:/Users/zjrobbin/Desktop/Growth_cruves/"
setwd(wdir)
%%R
TPA.factor <- 6.018046 #trees/acre adjust (TPA in FIA)
conv.factor <- .112085 #Convert from lbs/acre to g/m-2
%%R
###The Output from IC 2 Get Age
AgeGraphs<-read.csv(paste(wdir,"All_Sapps_FIA_AGE_TREE_HT.csv",sep=""))
print(head(AgeGraphs))
Plts<-unique(AgeGraphs$PLT_CN)
unique(AgeGraphs$calc_age_rounded)
Plts<-unique(AgeGraphs$PLT_CN)
colnames(AgeGraphs)
AgeGraphs$AdjCarbon<-AgeGraphs$CARBON_AG*AgeGraphs$TPAGROW_UNADJ*conv.factor
output<-NULL
%%R
for(i in 1:length(Plts)){
One<-as.data.frame(AgeGraphs[AgeGraphs$PLT_CN==Plts[i],])
plt<-Plts[i]
IC<-aggregate(round(One$AdjCarbon),by=list(SPCD=One$SPCD,AGE=as.numeric(One$calc_age_rounded)),FUN=sum)
IC$PLT_CN<-plt
colnames(IC)<-c("SPCD","AGE","AboveGround Carbon","PLT_CN")
output<-rbind(IC,output)
}
print(output)
%%R
print(output)
write.csv(output,paste(wdir,"FIA_Height_Carbon_Aggregate.csv",sep=""))
%%R
output<-read.csv(paste(wdir,"FIA_Height_Carbon_Aggregate.csv",sep=""))
print(head(output))
SPLUT<-read.csv("SpeciesLUT.csv")
uniquesp<-SPLUT[,2]
uni.sp<-unique(output$SPCD)
output<-output[output$AboveGround.Carbon>0,]
#print(colnames(SPLUT))
print(colnames(output))
This is the label for the for loop that lies below as the ...
%%R
for(i in uni.sp){
IC<-output[output$SPCD==i,]
#print("Species")
#print(i)
Speciesname<-as.character(SPLUT[,13][SPLUT[,2]==i])
out<-NULL
if(length(IC$AGE)>100){
uniqueage<-unique(IC$AGE)
for(t in uniqueage){
agebreak<-IC[IC$AGE==t,]
agebreak<-agebreak[agebreak$AboveGround.Carbon> quantile(agebreak$AboveGround.Carbon,probs=.80),]
out<-rbind(agebreak,out)
}
}
if(!is.null(out)){
IC<-out
}
#print(length(IC$AGE))
x<-IC$AGE
y<-IC$AboveGround.Carbon
filename<-paste(wdir,"Output/",i,"_GY_Curve.jpeg",sep="")
#print(filename)
jpeg(file = filename, bg = "transparent")
#plot(1:10)
#rect(1, 5, 3, 7, col = "white")
title<-paste(Speciesname, i, "n=",length(IC$AGE),"plots")
plot(y~x,main=title,xlim=c(0,200),ylim=c(0,4000),xlab="Age",ylab="Carbon g/m2",pch = 16,col="gray",cex.main=2.0,cex.lab=1.5,cex.axis=1.2,font=2)
fit<-lm(y~log(x))
summary(fit)
slope<-as.numeric(fit$coefficients[2])
lm(formula=y~log(x))
n=500
set.seed(10)
x=seq(from=0,to=200,length.out=201)
y=predict(fit,newdata=list(x=seq(from=0,to=200,length.out=201)),
interval="confidence")
g_m2_year<-(slope/x)*5
write<-(cbind(x,y,g_m2_year))
matlines(x,y,lwd=2)
dev.off()
x<-IC$AGE
y<-IC$AboveGround.Carbon
##Just plot important Species
write.csv(write,paste(wdir,"Output/",i,"GR_.csv",sep=""))
}
Here are graphs the 9 most prevelant species on the landscape. For each functional group we will use the two most prevalent species and find parameters that best align with the growth curve.
from matplotlib.pyplot import figure, imshow, axis
from matplotlib.image import imread
mypath='E:/Growth_cruves/Output'
hSize = 30
wSize = 50
col = 3
Important_Species=[129,621,832,316,806,833,132,711,802,261]
Growthcurveplot=["129_GY_Curve.jpeg","621_GY_Curve.jpeg","832_GY_Curve.jpeg","832_GY_Curve.jpeg","316_GY_Curve.jpeg","806_GY_Curve.jpeg","833_GY_Curve.jpeg","132_GY_Curve.jpeg",
"711_GY_Curve.jpeg","802_GY_Curve.jpeg","261_GY_Curve.jpeg",]
showImagesMatrix(Growthcurveplot, hSize = 62, wSize = 20,col=2)
#Carbon
PATH = "E:/FS_meeting/FiguresMisc/"
Image(filename = PATH + "Carbon.jpg", width=1000, height=1000)
#Nitrogen
PATH = "E:/FS_meeting/FiguresMisc/"
Image(filename = PATH + "Nitrogen.jpg", width=1000, height=1000)
##Other Ecosystem stuff
mypath='E:/FS_meeting/FiguresMisc'
Growthcurveplot=["LAI.jpg","Eco.jpg"]
showImagesMatrix(Growthcurveplot, hSize = 100, wSize = 70,col=2)
#importr("readtext", lib_loc= "C:/Users/zjrobbin/Documents/R/win-library/3.4")
importr("scales", lib_loc= "C:/Users/zjrobbin/Documents/R/win-library/3.4")
importr("RColorBrewer", lib_loc= "C:/Users/zjrobbin/Documents/R/win-library/3.4")
%%R
#library(readtext)
library(scales)
library(RColorBrewer)
LandisDrive<-"C:/Users/zjrobbin/Desktop/Sapps_SC/LANDIS_Sapps_Singlecell_Trial_SS/"
setwd(LandisDrive)##Wdir has to be landisDrive
writeDrive<-"C:/Users/zjrobbin/Desktop/Sapps_SC/SpeciesFiles/"
DataDrive<-"C:/Users/zjrobbin/Desktop/Growth_cruves/Output/"
display.brewer.all()
Using the the growth curves and the values of previous landis functional groups as a estimate we will paramterize the functional groups. Here is an example of the proccesing Quercus Prinus(sp=832) in the Northern Hardwood Group. Using the minimum positive value from the growth curve and its agb value, I grow one cohort of that value with the establishment turned to zero. First we ajdust the LAI factors.
%%R
#Paramaters
spp<-"129"
Functional Group LAI parameters: Affect AGB considerably and need to parameterized first.
%%R
Log<-"NECN-calibrate-log.csv"
Cal<-read.csv(paste(LandisDrive,Log,sep=""),row.names = NULL)
Cal$redux<-as.numeric(Cal$row.names)+10
Lastcal<-Cal###Remove after demo
plot(Cal$LAI~Cal$redux,main="LAI Total",xlab="Years", ylab="Total LAI",xlim=c(0,100),ylim=c(0,8),type="p",pch = 19,cex.main=2.0,cex.lab=1.35,cex.axis=1.2,cex=1.5,font=15,font.lab=15)
plot(Lastcal$LAI~Lastcal$redux,main="Previous Run",xlab="Years", ylab="Total LAI",xlim=c(0,100),ylim=c(0,8),,type="p",pch = 19,cex.main=2.0,cex.lab=1.45,cex.axis=1.2,cex=1.5,font=15,font.lab=15)
PATH = "C:/Users/zjrobbin/Desktop/Sapps_SC/PicsofTrees/"
Image(filename = PATH + "Oak_Hickory.png", width=500, height=500)
Citation: Hardiman, B. S., Gough, C. M., Halperin, A., Hofmeister, K. L., Nave, L. E., Bohrer, G., & Curtis, P. S. (2013). Maintaining high rates of carbon storage in old forests: a mechanism linking canopy structure to forest function. Forest Ecology and Management, 298, 111-119.
Once these are in the range of correct we can work on tuning the aboveground biomass. Name<-"NHW" Index<-"4" PPDF1<-"25.0" PPDF2<-"40.0" PPDF3<-"2.05" ###beginning arc higher is lower .5-.05 PPDF4<-"5.0"###long arc Higher is lower whole number FCFRAC<-"0.48"
Using each run and an ealier run we adjust the parameters to best match the aboveground AGB
%%R
RunName<-"QuerPrin4"
Log<-"NECN-succession-log.csv"
FileWrite<-paste(writeDrive,RunName,sep="")
#dir.create(FileWrite)
print("set Age at carbon")
NECNSuccession<-read.csv(paste(LandisDrive,Log,sep=""))
GrowthCurves<-read.csv(paste(DataDrive,spp,"GR_.csv",sep=""))
##
NECNSuccession<-read.csv(paste(LandisDrive,Log,sep=""))
GrowthCurves<-read.csv(paste(DataDrive,spp,"GR_.csv",sep=""))
Growthxy<-GrowthCurves[,c(2,3)]
Growthxy$fit<-Growthxy$fit*2
NECNxy<-NECNSuccession[,c(1,7)]
Start<-Growthxy[Growthxy$fit>0,][1,]
Agestart<-Start[1,1]
NECNxy$Time<-NECNSuccession$Time+Agestart
#print(Start)
#par(mfrow=c(1,1))
#dev.new(width=5, height=4)
plot(NECNxy,col=alpha("darkorange"),xlim=c(10,120),ylim=c(0,10000),type="p",main="This Run",pch = 19,cex.main=2.0,cex.lab=1.4,cex.axis=1.2,cex=1.5,font=2)
points(Growthxy,pch = 19,type="p",col=alpha("darkgreen",0.8),cex=1.5)
legend("topleft",legend=c("Landis","FIA"),col=c("darkorange","darkgreen"),cex=1.0,pt.cex=1.0,pch=c(19,19))
filename<-paste(paste("Spp",spp,".png",sep=""))
#print(filename)
plot(PreviousRunNECN,col=alpha("darkorange"),xlim=c(10,120),ylim=c(0,10000),type="p",main="Previous Run",pch = 19,cex.main=2.0,cex.lab=1.4,cex.axis=1.2,cex=1.5,font=2)
points(PreviousRunGrowth,pch = 19,type="p",col=alpha("darkgreen",0.8),cex=1.5)
legend("topleft",legend=c("Landis","FIA"),col=c("darkorange","darkgreen"),cex=1.0,pt.cex=1.0,pch=c(19,19))
%%R
PreviousRunGrowth<-Growthxy
PreviousRunNECN<-NECNxy
jpeg(file =paste(writeDrive,"/",RunName,"/",RunName,".jpeg",sep=""), bg = "transparent")
plot(NECNxy,col="red",xlim=c(16,120),ylim=c(0,10000))
points(Growthxy)
legend("topleft",legend=c("Landis","FIA"),col=c("red","black"),cex=1.0,pt.cex=1.0,pch=c(1,1))
dev.off()
object<-readtext(paste(LandisDrive,"Sapps_NECN_Values_SingleCell.txt",sep=""))
fileConn<-file(paste(writeDrive,RunName,"/",RunName,"LandscapeText.txt",sep=""))
writeLines(object[,2], fileConn)
close(fileConn)
write.csv(NECNSuccession,paste(writeDrive,RunName,"/",RunName,"sucesssion.csv",sep=""))
We can use the same data as before to compare to the NPP from our LANDIS run
%%R
NEEC<-cbind(NECNSuccession[,(1)],(NECNSuccession[,(8)]+NECNSuccession[,(9)])*1/90.7185)
plot(NEEC,xlim=c(16,120),ylim=c(0,10),type="p",pch = 19,cex.main=2.0,cex.lab=1.45,cex.axis=1.2,cex=1.5,font=15,font.lab=15,,main="NPP t C Ha-1 yr-1",xlab="Years", ylab="tC HA-1 yr-1")
PATH = "C:/Users/zjrobbin/Desktop/Sapps_SC/PicsofTrees/"
Image(filename = PATH + "Oak_Hickory.png", width=500, height=500)
Take away: